Package installation

Note: my local evalcast and covidcast installations may be slightly ahead of the remote versions due to outstanding PRs.

remotes::install_github("cmu-delphi/covidcast", ref="main",
                        subdir = "R-packages/covidcast")
remotes::install_github("cmu-delphi/covidcast", ref = "evalcast-killcards",
                         subdir = "R-packages/evalcast")
remotes::install_github(repo="ryantibs/quantgen", subdir="R-package/quantgen")
remotes::install_github("cmu-delphi/covidcast", ref = "main",
                         subdir = "R-packages/modeltools")

Produce forecasts

Current model is quantile regression with 3 lags of the response (deaths) and 3 lags of cases. This is at the state level.

# Quantile autoregression with 3 lags, or QAR3
 
corrector <- zookeeper::make_aardvark_corrector()
bonehead <- get_predictions(
  forecaster = quantgen_forecaster, 
  name_of_forecaster = "bonehead",
  signals = tibble::tibble(
                      data_source = data_source, 
                      signal = data_signals,
                      start_day = list(start_day_quantgen)),
  forecast_dates = forecast_dates, 
  as_of_override = function(x) lubridate::ymd(Sys.Date()),
  incidence_period = incidence_period, 
  ahead = ahead, 
  geo_type = "state", 
  signal_aggregation = "list", 
  geo_values = "*",
  apply_corrections = corrector,
  featurize = sum_to_epiweek,
  n = n, 
  lags = lags, # optionally use a list for different lags by 
  lambda = 0, # Just do quantile regression 
  sort = sort, 
  nonneg = nonneg)
middling <- get_predictions(
  forecaster = quantgen_forecaster, 
  name_of_forecaster = "middling",
  signals = tibble::tibble(
                      data_source = data_source, 
                      signal = data_signals,
                      start_day = list(start_day_quantgen)),
  forecast_dates = forecast_dates, 
  as_of_override = function(x) lubridate::ymd(Sys.Date()),
  incidence_period = incidence_period, 
  ahead = ahead, 
  geo_type = "state", 
  signal_aggregation = "list", 
  geo_values = "*",
  apply_corrections = corrector,
  featurize = grab_epiweek,
  n = n, 
  lags = lags, # optionally use a list for different lags by 
  lambda = 0, # Just do quantile regression 
  sort = sort, 
  nonneg = nonneg)
lots <- get_predictions(
  forecaster = quantgen_forecaster, 
  name_of_forecaster = "lots_of_data",
  signals = tibble::tibble(
                      data_source = data_source, 
                      signal = data_signals,
                      start_day = list(start_day_quantgen)),
  forecast_dates = forecast_dates, 
  as_of_override = function(x) lubridate::ymd(Sys.Date()),
  incidence_period = incidence_period, 
  ahead = ahead, 
  geo_type = "state", 
  signal_aggregation = "list", 
  geo_values = "*",
  apply_corrections = corrector,
  featurize = dav_maker,
  n = n, 
  lags = lags, # optionally use a list for different lags by 
  lambda = 0, # Just do quantile regression 
  sort = sort, 
  nonneg = nonneg)
library(tidyr)
library(purrr)
library(ggplot2)
theme_set(theme_bw())
competition <- c("COVIDhub-ensemble","COVIDhub-baseline",
                 "CMU-TimeSeries", "Karlen-pypm")
submitted <- lapply(competition[1:3], get_covidhub_predictions, 
                    forecast_dates = forecast_dates, 
                    signal = "deaths_incidence_num")
submitted[[4]] <- get_covidhub_predictions("Karlen-pypm", 
                                           forecast_dates = forecast_dates - 1,
                                           signal = "deaths_incidence_num") %>%
  mutate(forecast_date = forecast_date + 1)
submitted <- bind_rows(submitted) %>% filter(ahead < 5)


# Some fixes to make comparable
qar_dc <- bind_rows(bonehead, middling, lots) %>% 
  mutate(quantile = as.numeric(quantile),
         incidence_period = "epiweek",
         ahead = ahead %/% 7 + 1) 

results <- evaluate_predictions(bind_rows(qar_dc, submitted),
                                backfill_buffer = 0,
                                geo_type = "state") %>%
  filter(! geo_value %in% c("as","gu","vi","mp","us"))

Overall AE, WIS, Coverage 80

We compare the new forecaster to * COVIDhub-baseline * COVIDhub-ensemble * Our submission * Karlen pypm (the top model over the last 3 months)

Top line conclusions:

  1. Overall performance approaches the ensemble.
  2. By WIS, we are the top model for 1 weeks ahead, still good at 2-4 weeks ahead
  3. Coverage is very good.
subtitle = sprintf("Forecasts made over %s to %s",
                   format(min(forecast_dates), "%B %d"),
                   format(max(forecast_dates), "%B %d"))

plot_canonical(results, x = "ahead", y = "ae", aggr = mean) +
  labs(subtitle = subtitle, xlab = "Weeks ahead", ylab = "Mean AE") +
  theme(legend.position = "bottom") + 
  scale_y_log10()

plot_canonical(results, x = "ahead", y = "wis", aggr = mean) +
  labs(subtitle = subtitle, xlab = "Weeks ahead", ylab = "Mean WIS") +
  theme(legend.position = "bottom") + 
  scale_y_log10()

plot_canonical(results, x = "ahead", y = "coverage_80", aggr = mean) +
  labs(subtitle = subtitle, xlab = "Weeks ahead", ylab = "Mean Coverage") +
  theme(legend.position = "bottom") + 
  coord_cartesian(ylim=c(0,1)) + geom_hline(yintercept = .8, color="black")

AE, WIS, and coverage by forecast date

Top line conclusions:

  1. We crush it until December.
  2. Quite a bit worse in Dec/January, though better than aardvark.
  3. Coverage is generally good at all aheads.
theme_set(theme_bw())
plot_canonical(results, x = "forecast_date", y = "ae", aggr = mean,
               grp_vars = c("forecaster","ahead"), facet_rows = "ahead") +
  labs(subtitle = subtitle, xlab = "forecast date", ylab = "Mean AE") +
  theme(legend.position = "bottom") + 
  scale_y_log10()

plot_canonical(results, x = "forecast_date", y = "wis", aggr = mean,
               grp_vars = c("forecaster","ahead"), facet_rows = "ahead") +
  labs(subtitle = subtitle, xlab = "forecast date", ylab = "Mean WIS") +
  theme(legend.position = "bottom") + 
  scale_y_log10()

plot_canonical(results, x = "forecast_date", y = "coverage_80", aggr = mean,
               grp_vars = c("forecaster","ahead"), facet_rows = "ahead") +
  labs(subtitle = subtitle, xlab = "forecast date", ylab = "Mean Coverage") +
  theme(legend.position = "bottom") + 
  coord_cartesian(ylim=c(0,1)) + geom_hline(yintercept = .8, color="black")

Median relative WIS

Relative to baseline; scale first then take the median.

plot_canonical(results, x = "ahead", y = "wis", aggr = median,
               base_forecaster = "COVIDhub-baseline", scale_before_aggr = TRUE) +
  labs(subtitle = subtitle, xlab = "Weeks ahead", ylab = "Median relative WIS") +
  theme(legend.position = "bottom") + 
  geom_hline(yintercept = 1)

plot_canonical(results, x = "forecast_date", y = "wis", aggr = median,
               grp_vars = c("forecaster", "ahead"), facet_rows = "ahead",
               base_forecaster = "COVIDhub-baseline", scale_before_aggr = TRUE) +
  labs(subtitle = subtitle, xlab = "Forecast date", ylab = "Median relative WIS") +
  theme(legend.position = "bottom") + 
  geom_hline(yintercept = 1)

(Geometric) Mean relative WIS

Relative to baseline; scale first then take the geometric mean, ignoring a few 0’s. I think this is potentially more useful than the median/mean for relative WIS (or relative AE), but I haven’t completely thought it through. Putting the results here to be provocative.

geom_mean <- function(x) prod(x)^(1/length(x))
plot_canonical(results %>% filter(wis > 0), x = "ahead", y = "wis", 
               aggr = geom_mean,
               base_forecaster = "COVIDhub-baseline", scale_before_aggr = TRUE) + 
  labs(subtitle = subtitle, 
       xlab = "Weeks ahead", ylab = "Mean (geometric) relative WIS") +
  theme(legend.position = "bottom") + 
  geom_hline(yintercept = 1)

plot_canonical(results %>% filter(wis > 0), x = "forecast_date", y = "wis", 
               aggr = geom_mean, facet_rows = "ahead",
               grp_vars = c("forecaster", "ahead"),
               base_forecaster = "COVIDhub-baseline", scale_before_aggr = TRUE) +
  theme(legend.position = "bottom") + 
  labs(subtitle = subtitle, 
       xlab = "Forecast date", ylab = "Mean (geometric) relative WIS") +
  geom_hline(yintercept = 1)

Scores by target date (not forecast date)

plot_canonical(results, x = "target_end_date", y = "wis", aggr = mean,
               dots = TRUE, grp_vars = "forecaster") + 
  labs(subtitle = subtitle, xlab = "Target date", ylab = "Mean WIS") +
  theme(legend.position = "bottom") + 
  scale_y_log10()

plot_canonical(results, x = "target_end_date", y = "wis", aggr = mean,
               dots = TRUE, grp_vars = c("forecaster", "ahead"), 
               facet_rows = "ahead") +
  labs(subtitle = subtitle, xlab = "Target date", ylab = "Mean WIS") +
  theme(legend.position = "bottom") + 
  scale_y_log10()

Trajectory plots

pd <- evalcast:::setup_plot_trajectory(qar_dc, intervals = 0.8,
                                       geo_type = "state", 
                                       start_day = min(forecast_dates) - 60)

g <- ggplot(pd$truth_df, mapping = aes(x = target_end_date))

# build the fan
g <- g + geom_ribbon(
  data = pd$quantiles_df,
  mapping = aes(ymin = lower, ymax = upper, fill = forecaster, 
                group = interaction(forecaster,forecast_date)),
  alpha = .1) +
  scale_fill_viridis_d()

# line layer
g <- g +
  #geom_line(aes(y = .data$value.y), color = "#3182BD") + # corrected
  geom_line(aes(y = value)) + # reported
  geom_line(data = pd$points_df, 
            mapping = aes(y = value, color = forecaster, 
                          group = interaction(forecaster,forecast_date)),
            size = 1) +
  geom_point(aes(y = value)) + # reported gets dots
  geom_point(data = pd$points_df, 
             mapping = aes(y = value, color = forecaster),
             size = 3) +
  scale_color_viridis_d()

g + theme_bw(base_size = 20) + 
  facet_wrap(~geo_value, scales = "free_y", ncol = 5) +
  theme(legend.position = "top") + ylab("") + xlab("")